General Model Approach A - Univariate
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.5
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1
## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1
## ✔ purrr 1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(scoringRules)
library(ggplot2)
# Load the model data
# Here: NeuralProphet
model <- read.csv("../data/forecasts/peaks_22-24_model-neuralprophet.csv")
model$ds <- as.POSIXct(model$ds, tz = "UTC")
# Here: Arma
# model <- read.csv("./data/forecasts/peaks_22-24_model-arma.csv")
# model$ds <- as.POSIXct(model$ds, tz = "UTC")
# specify the length for the error learning phase in days
length <- 365
# Filter only for the years 2023 (Training) and 2024 (Testing)
model <- model |>
filter((year(ds) == 2023) | (year(ds) == 2024))
getCRPS_A <- function(d) {
# Parameter: d day to predict
print(d)
# --------- Error Learning Phase ---------
# Getting the test data: starting from day i get the next 365 days
# Achtung: Hier dürfen nur Training oder Testing data verwendet werden
df_train <- model[d:(364 + d), ]
# Standardize the residuals
mu_resid <- mean(df_train$residuals)
sigma_resid <- sqrt(var(df_train$residuals))
df_train$residuals_std <- (df_train$residuals - mu_resid) / sigma_resid
# hist(df_train$residuals_std, main = "Histogram of Standardized Residuals", xlab = "Standardized Residuals")
# Generate the Empirical Distribution Function for the resids
ecdf <- ecdf(df_train$residuals_std)
# Define the inverse ECDF
# Input p is the probability and the return is the quantile
inverse_ecdf <- function(p) {
quantile(ecdf, p, names = FALSE)
}
print("Error Learning DONE...")
# ------- Prediction phase -------
m <- 90
# Define the quantiles
quantiles <- seq(1 / (m + 1), m / (m + 1), 1 / (m + 1))
peaks_dis_A <- data.frame(quantiles = quantiles, values = rep(0, length(quantiles)))
# Get the next 24 hours after the testing period
# Hier nur Testing data vom Model!!!
df_test <- model[(d + length), ]
for (i in quantiles) {
# Destandardize the error for the qunatile i
quantile_resid <- (inverse_ecdf(i) * sigma_resid) + mu_resid
peaks_dis_A[peaks_dis_A$quantiles == i, "values"] <- df_test$yhat[1] + quantile_resid
}
print("Prediction DONE...")
# Histogram
hist(peaks_dis_A[, "values"], main = "Histogram of Peaks Distribution A", xlab = "Peaks", breaks = "Sturges")
# Extracting the peak from the test day
peak <- df_test[, "y"]
# CRPS-Score
crps_sample(peak, peaks_dis_A[, "values"])
}
# specify the length for rolling iterations in days
len_test <- 28
crps_scores <- list()
for (d in seq(1, len_test)) {
crps_score <- getCRPS_A(d)
# Append the CRPS score to the list
crps_scores[[d]] <- crps_score
}
## [1] 1
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 2
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 3
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 4
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 5
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 6
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 7
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 8
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 9
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 10
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 11
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 12
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 13
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 14
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 15
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 16
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 17
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 18
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 19
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 20
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 21
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 22
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 23
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 24
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 25
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 26
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 27
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 28
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

print("Mean CRPS for 28 days in 2024 for NeuralProphet")
## [1] "Mean CRPS for 28 days in 2024 for NeuralProphet"
print(mean(unlist(crps_scores)))
## [1] 2777.669
library(tidyverse)
library(scoringRules)
library(ggplot2)
# Load the model data
# Here: NeuralProphet
# model <- read.csv("./data/forecasts/peaks_22-24_model-neuralprophet.csv")
# model$ds <- as.POSIXct(model$ds, tz = "UTC")
# Here: Arma
model <- read.csv("../data/forecasts/peaks_22-24_model-arma.csv")
model$ds <- as.POSIXct(model$ds, tz = "UTC")
# specify the length for the error learning phase in days
length <- 365
# Filter only for the years 2023 (Training) and 2024 (Testing)
model <- model |>
filter((year(ds) == 2023) | (year(ds) == 2024))
getCRPS_A <- function(d) {
# Parameter: d day to predict
print(d)
# --------- Error Learning Phase ---------
# Getting the test data: starting from day i get the next 365 days
# Achtung: Hier dürfen nur Training oder Testing data verwendet werden
df_train <- model[d:(364 + d), ]
# Standardize the residuals
mu_resid <- mean(df_train$residuals)
sigma_resid <- sqrt(var(df_train$residuals))
df_train$residuals_std <- (df_train$residuals - mu_resid) / sigma_resid
# hist(df_train$residuals_std, main = "Histogram of Standardized Residuals", xlab = "Standardized Residuals")
# Generate the Empirical Distribution Function for the resids
ecdf <- ecdf(df_train$residuals_std)
# Define the inverse ECDF
# Input p is the probability and the return is the quantile
inverse_ecdf <- function(p) {
quantile(ecdf, p, names = FALSE)
}
print("Error Learning DONE...")
# ------- Prediction phase -------
m <- 90
# Define the quantiles
quantiles <- seq(1 / (m + 1), m / (m + 1), 1 / (m + 1))
peaks_dis_A <- data.frame(quantiles = quantiles, values = rep(0, length(quantiles)))
# Get the next 24 hours after the testing period
# Hier nur Testing data vom Model!!!
df_test <- model[(d + length), ]
for (i in quantiles) {
# Destandardize the error for the qunatile i
quantile_resid <- (inverse_ecdf(i) * sigma_resid) + mu_resid
peaks_dis_A[peaks_dis_A$quantiles == i, "values"] <- df_test$yhat[1] + quantile_resid
}
print("Prediction DONE...")
# Histogram
hist(peaks_dis_A[, "values"], main = "Histogram of Peaks Distribution A", xlab = "Peaks", breaks = "Sturges")
# Extracting the peak from the test day
peak <- df_test[, "y"]
# CRPS-Score
crps_sample(peak, peaks_dis_A[, "values"])
}
# specify the length for rolling iterations in days
len_test <- 28
crps_scores <- list()
for (d in seq(1, len_test)) {
crps_score <- getCRPS_A(d)
# Append the CRPS score to the list
crps_scores[[d]] <- crps_score
}
## [1] 1
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 2
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 3
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 4
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 5
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 6
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 7
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 8
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 9
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 10
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 11
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 12
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 13
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 14
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 15
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 16
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 17
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 18
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 19
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 20
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 21
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 22
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 23
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 24
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 25
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 26
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 27
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

## [1] 28
## [1] "Error Learning DONE..."
## [1] "Prediction DONE..."

print("Mean CRPS for 28 days in 2024 for ARMA")
## [1] "Mean CRPS for 28 days in 2024 for ARMA"
print(mean(unlist(crps_scores)))
## [1] 7875.091
General Model Approach B - Multivariate
library(tidyverse)
library(scoringRules)
library(ggplot2)
library(copula)
##
## Attaching package: 'copula'
## The following object is masked from 'package:lubridate':
##
## interval
# This is a first general implementation of version B
# Load the model data
# Here: NeuralProphet
model <- read.csv("../data/forecasts/load_22-24_model-neuralprophet_2024IsForecasted.csv")
model$ds <- as.POSIXct(model$ds, tz = "UTC")
# Here: Arma
# model <- read.csv("./data/forecasts/load_22-24_model-arma.csv")
# model$ds <- as.POSIXct(model$ds, tz = "UTC")
# specify the length for the error learning phase in days
length <- 365
# Filter only for the years 2023 (Training) and 2024 (Testing)
model <- model |>
filter((year(ds) == 2023) | (year(ds) == 2024))
getCRPS_B <- function(d) {
print(d)
# --------- Error Learning Phase ---------
# Getting the test data: starting from day i get the next 365 days
# Achtung: Hier dürfen nur Training oder Testing data verwendet werden
df_train <- model[((d - 1) * 24 + 1):(((364 + d) * 24) - 1), ]
mu_sigma <- data.frame(hour = seq(0, 23), mu = rep(0, 24), sigma = rep(0, 24))
for (i in seq(0, 23)) {
load_h <- df_train |>
filter(hour(ds) == i)
mean <- mean(load_h$residuals)
std <- sqrt(var(load_h$residuals))
mu_sigma[i + 1, "mu"] <- mean
mu_sigma[i + 1, "sigma"] <- std
# Extract the residuals from the data
resid <- data.frame(resid = load_h$residuals)
# Standardize the residuals
resid <- resid |>
mutate(resid_std = (resid - mu_sigma[i + 1, "mu"]) / mu_sigma[i + 1, "sigma"])
# Generate the ECDF
assign(paste0("ecdf_", i), ecdf(resid$resid_std))
}
# Generate the Inverse ECDF
# Input: p... Probabilty , i... hour
# Output: quantile
inverse_ecdf <- function(p, i) {
quantile(get(paste0("ecdf_", i)), p, names = FALSE)
}
print("Error Learning DONE...")
# ---- Dependence Learning Phase ----
# Define the length m of the learning phase
m <- 90
# Initialize the training sample of the past m errors for every hour to learn the copula
X <- matrix(nrow = m, ncol = 0)
for (i in seq(0, 23)) {
# get the hour data
load_h <- df_train |>
filter(hour(ds) == i)
# get the errors
epsilon <- as.vector(load_h$residuals)
# Select only the last m errors
epsilon <- epsilon[(length(epsilon) - m):length(epsilon)]
# standardize the errors
epsilon_std <- (epsilon - mu_sigma[i + 1, "mu"]) / mu_sigma[i + 1, "sigma"]
# Apply the ecdf
ecdf <- get(paste0("ecdf_", i))
epsilon <- ecdf(epsilon_std)
X <- cbind(X, epsilon)
}
# Create the Copula and the Rank Matrix
# 2) Non-Parametric Approach
# Consider X as the sample from the empirical copula
# Apply the rank function to get the rank matrix
R_emp <- apply(X, 2, rank)
print("Dependence Learning DONE...")
# ------- Prediction phase -------
# Generate the 24 univariate for every hour
# m also defines the quantile level i.. 1 - m with i/m+1 quantiles
univariate_forecast_t <- matrix(nrow = 24, ncol = m)
quantiles <- seq(1 / (m + 1), m / (m + 1), 1 / (m + 1))
# Get the next 24 hours after the testing period
# Hier nur Testing data vom Model!!!
df_test <- model[((d + length - 1) * 24):((d + length) * 24 - 1), ]
predict_point <- df_test[, "yhat"]
for (i in seq(0, 23)) {
# Get the prediction for the next hour of the day and the the errors to get distribution
univariate_forecast_t[i + 1, ] <- predict_point[i + 1] + mu_sigma[i + 1, "mu"] + inverse_ecdf(quantiles, i) * mu_sigma[i + 1, "sigma"]
}
# Dependence Learning: Pairing up the Copula
multivariate_forecast <- matrix(nrow = nrow(R_emp), ncol = ncol(R_emp))
for (i in seq(1:24)) {
j <- 1
for (r in R_emp[, i]) {
multivariate_forecast[j, i] <- univariate_forecast_t[i, r]
j <- j + 1
}
}
print("Prediction DONE...")
# Extracting the peaks from the multivariate distribution
peaks_dis_B <- apply(multivariate_forecast, MARGIN = 1, FUN = max)
# Extracting the peak from the test day
peak <- max(df_test[, "y"])
hist(peaks_dis_B, main = "Histogram of Peaks Distribution B", xlab = "Peaks", breaks = "Sturges")
return(crps_sample(peak, peaks_dis_B))
}
# specify the length for rolling iterations in days
len_test <- 28
crps_scores <- list()
for (d in seq(1, len_test)) {
crps_score <- getCRPS_B(d)
# Append the CRPS score to the list
crps_scores[[d]] <- crps_score
}
## [1] 1
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 2
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 3
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 4
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 5
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 6
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 7
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 8
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 9
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 10
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 11
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 12
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 13
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 14
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 15
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 16
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 17
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 18
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 19
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 20
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 21
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 22
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 23
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 24
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 25
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 26
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 27
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 28
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

print("Mean CRPS for 28 days in 2024 for NeuralProphet")
## [1] "Mean CRPS for 28 days in 2024 for NeuralProphet"
print(mean(unlist(crps_scores)))
## [1] 2916.514
library(tidyverse)
library(scoringRules)
library(ggplot2)
library(copula)
# This is a first general implementation of version B
# Load the model data
# Here: NeuralProphet
# model <- read.csv("../data/forecasts/load_22-24_model-neuralprophet_2024IsForecasted.csv")
# model$ds <- as.POSIXct(model$ds, tz = "UTC")
# Here: Arma
model <- read.csv("../data/forecasts/load_22-24_model-arma.csv")
model$ds <- as.POSIXct(model$ds, tz = "UTC")
# specify the length for the error learning phase in days
length <- 365
# Filter only for the years 2023 (Training) and 2024 (Testing)
model <- model |>
filter((year(ds) == 2023) | (year(ds) == 2024))
getCRPS_B <- function(d) {
print(d)
# --------- Error Learning Phase ---------
# Getting the test data: starting from day i get the next 365 days
# Achtung: Hier dürfen nur Training oder Testing data verwendet werden
df_train <- model[((d - 1) * 24 + 1):(((364 + d) * 24) - 1), ]
mu_sigma <- data.frame(hour = seq(0, 23), mu = rep(0, 24), sigma = rep(0, 24))
for (i in seq(0, 23)) {
load_h <- df_train |>
filter(hour(ds) == i)
mean <- mean(load_h$residuals)
std <- sqrt(var(load_h$residuals))
mu_sigma[i + 1, "mu"] <- mean
mu_sigma[i + 1, "sigma"] <- std
# Extract the residuals from the data
resid <- data.frame(resid = load_h$residuals)
# Standardize the residuals
resid <- resid |>
mutate(resid_std = (resid - mu_sigma[i + 1, "mu"]) / mu_sigma[i + 1, "sigma"])
# Generate the ECDF
assign(paste0("ecdf_", i), ecdf(resid$resid_std))
}
# Generate the Inverse ECDF
# Input: p... Probabilty , i... hour
# Output: quantile
inverse_ecdf <- function(p, i) {
quantile(get(paste0("ecdf_", i)), p, names = FALSE)
}
print("Error Learning DONE...")
# ---- Dependence Learning Phase ----
# Define the length m of the learning phase
m <- 90
# Initialize the training sample of the past m errors for every hour to learn the copula
X <- matrix(nrow = m, ncol = 0)
for (i in seq(0, 23)) {
# get the hour data
load_h <- df_train |>
filter(hour(ds) == i)
# get the errors
epsilon <- as.vector(load_h$residuals)
# Select only the last m errors
epsilon <- epsilon[(length(epsilon) - m):length(epsilon)]
# standardize the errors
epsilon_std <- (epsilon - mu_sigma[i + 1, "mu"]) / mu_sigma[i + 1, "sigma"]
# Apply the ecdf
ecdf <- get(paste0("ecdf_", i))
epsilon <- ecdf(epsilon_std)
X <- cbind(X, epsilon)
}
# Create the Copula and the Rank Matrix
# 2) Non-Parametric Approach
# Consider X as the sample from the empirical copula
# Apply the rank function to get the rank matrix
R_emp <- apply(X, 2, rank)
print("Dependence Learning DONE...")
# ------- Prediction phase -------
# Generate the 24 univariate for every hour
# m also defines the quantile level i.. 1 - m with i/m+1 quantiles
univariate_forecast_t <- matrix(nrow = 24, ncol = m)
quantiles <- seq(1 / (m + 1), m / (m + 1), 1 / (m + 1))
# Get the next 24 hours after the testing period
# Hier nur Testing data vom Model!!!
df_test <- model[((d + length - 1) * 24):((d + length) * 24 - 1), ]
predict_point <- df_test[, "yhat"]
for (i in seq(0, 23)) {
# Get the prediction for the next hour of the day and the the errors to get distribution
univariate_forecast_t[i + 1, ] <- predict_point[i + 1] + mu_sigma[i + 1, "mu"] + inverse_ecdf(quantiles, i) * mu_sigma[i + 1, "sigma"]
}
# Dependence Learning: Pairing up the Copula
multivariate_forecast <- matrix(nrow = nrow(R_emp), ncol = ncol(R_emp))
for (i in seq(1:24)) {
j <- 1
for (r in R_emp[, i]) {
multivariate_forecast[j, i] <- univariate_forecast_t[i, r]
j <- j + 1
}
}
print("Prediction DONE...")
# Extracting the peaks from the multivariate distribution
peaks_dis_B <- apply(multivariate_forecast, MARGIN = 1, FUN = max)
# Extracting the peak from the test day
peak <- max(df_test[, "y"])
hist(peaks_dis_B, main = "Histogram of Peaks Distribution B", xlab = "Peaks", breaks = "Sturges")
return(crps_sample(peak, peaks_dis_B))
}
# specify the length for rolling iterations in days
len_test <- 28
crps_scores <- list()
for (d in seq(1, len_test)) {
crps_score <- getCRPS_B(d)
# Append the CRPS score to the list
crps_scores[[d]] <- crps_score
}
## [1] 1
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 2
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 3
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 4
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 5
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 6
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 7
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 8
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 9
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 10
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 11
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 12
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 13
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 14
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 15
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 16
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 17
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 18
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 19
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 20
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 21
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 22
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 23
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 24
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 25
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 26
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

## [1] 27
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."
## [1] 28
## [1] "Error Learning DONE..."
## [1] "Dependence Learning DONE..."
## [1] "Prediction DONE..."

print("Mean CRPS for 28 days in 2024 for ARMA")
## [1] "Mean CRPS for 28 days in 2024 for ARMA"
print(mean(unlist(crps_scores)))
## [1] 16453.73